skip_cr3:
error |= __vmread(CR4_READ_SHADOW, &old_cr4);
-#if defined (__i386__)
- error |= __vmwrite(GUEST_CR4, (c->cr4 | X86_CR4_VMXE));
-#else
- error |= __vmwrite(GUEST_CR4, (c->cr4 | X86_CR4_VMXE | X86_CR4_PAE));
-#endif
+ error |= __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
error |= __vmwrite(CR4_READ_SHADOW, c->cr4);
error |= __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
{
/* CR4 */
unsigned long old_guest_cr;
- unsigned long pae_disabled = 0;
__vmread(GUEST_CR4, &old_guest_cr);
if (value & X86_CR4_PAE){
set_bit(VMX_CPU_STATE_PAE_ENABLED, &d->arch.arch_vmx.cpu_state);
- if(!vmx_paging_enabled(d))
- pae_disabled = 1;
} else {
if (test_bit(VMX_CPU_STATE_LMA_ENABLED,
&d->arch.arch_vmx.cpu_state)){
}
__vmread(CR4_READ_SHADOW, &old_cr);
- if (pae_disabled)
- __vmwrite(GUEST_CR4, value| X86_CR4_VMXE);
- else
- __vmwrite(GUEST_CR4, value| X86_CR4_VMXE);
+ __vmwrite(GUEST_CR4, value| VMX_CR4_HOST_MASK);
__vmwrite(CR4_READ_SHADOW, value);
/*
EXCEPTION_BITMAP_GP )
#endif
+/* These bits in the CR4 are owned by the host */
+#ifdef __i386__
+#define VMX_CR4_HOST_MASK (X86_CR4_VMXE)
+#else
+#define VMX_CR4_HOST_MASK (X86_CR4_VMXE | X86_CR4_PAE)
+#endif
+
#define VMCALL_OPCODE ".byte 0x0f,0x01,0xc1\n"
#define VMCLEAR_OPCODE ".byte 0x66,0x0f,0xc7\n" /* reg/opcode: /6 */
#define VMLAUNCH_OPCODE ".byte 0x0f,0x01,0xc2\n"